Cleanup mov to CR4 handling.
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 14 Jul 2005 08:00:35 +0000 (08:00 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 14 Jul 2005 08:00:35 +0000 (08:00 +0000)
Signed-off-by: Arun Sharma <arun.sharma@intel.com>
xen/arch/x86/vmx.c
xen/include/asm-x86/vmx.h

index cc480ab05aa15a869b4ac20a982ff2be295cf993..5f4d048dc4a4c992162a248a340945b410ee34ca 100644 (file)
@@ -801,11 +801,7 @@ vmx_world_restore(struct vcpu *d, struct vmx_assist_context *c)
 skip_cr3:
 
     error |= __vmread(CR4_READ_SHADOW, &old_cr4);
-#if defined (__i386__)
-    error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE));
-#else
-    error |= __vmwrite(GUEST_CR4, (c->cr4 | X86_CR4_VMXE | X86_CR4_PAE));
-#endif
+    error |= __vmwrite(GUEST_CR4, (c->cr4 | VMX_CR4_HOST_MASK));
     error |= __vmwrite(CR4_READ_SHADOW, c->cr4);
 
     error |= __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
@@ -1178,13 +1174,10 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
     {
         /* CR4 */
         unsigned long old_guest_cr;
-        unsigned long pae_disabled = 0;
 
         __vmread(GUEST_CR4, &old_guest_cr);
         if (value & X86_CR4_PAE){
             set_bit(VMX_CPU_STATE_PAE_ENABLED, &d->arch.arch_vmx.cpu_state);
-            if(!vmx_paging_enabled(d))
-                pae_disabled = 1;
         } else {
             if (test_bit(VMX_CPU_STATE_LMA_ENABLED,
                          &d->arch.arch_vmx.cpu_state)){
@@ -1194,11 +1187,8 @@ static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
         }
 
         __vmread(CR4_READ_SHADOW, &old_cr);
-        if (pae_disabled)
-            __vmwrite(GUEST_CR4, value| X86_CR4_VMXE);
-        else
-            __vmwrite(GUEST_CR4, value| X86_CR4_VMXE);
 
+        __vmwrite(GUEST_CR4, value| VMX_CR4_HOST_MASK);
         __vmwrite(CR4_READ_SHADOW, value);
 
         /*
index eeffcf32cf5b342d1a61e6214b493aa16b4b0b19..33a3f7fdda737a485649378f0b4a3a4942fe5fa0 100644 (file)
@@ -183,6 +183,13 @@ extern unsigned int cpu_rev;
       EXCEPTION_BITMAP_GP )
 #endif
 
+/* These bits in the CR4 are owned by the host */
+#ifdef __i386__
+#define VMX_CR4_HOST_MASK (X86_CR4_VMXE)
+#else
+#define VMX_CR4_HOST_MASK (X86_CR4_VMXE | X86_CR4_PAE)
+#endif
+
 #define VMCALL_OPCODE   ".byte 0x0f,0x01,0xc1\n"
 #define VMCLEAR_OPCODE  ".byte 0x66,0x0f,0xc7\n"        /* reg/opcode: /6 */
 #define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n"